int nr = smp_processor_id();
struct tss_struct *t = &init_tss[nr];
- if ( (ss == __HYPERVISOR_CS) || (ss == __HYPERVISOR_DS) )
- return -1;
+ if ( !VALID_DATASEL(ss) )
+ return -EINVAL;
current->thread.ss1 = ss;
current->thread.esp1 = esp;
*/
struct task_struct *do_newdomain(unsigned int dom_id, unsigned int cpu)
{
- int retval;
+ int retval, i;
struct task_struct *p = NULL;
unsigned long flags;
p->addr_limit = USER_DS;
p->active_mm = &p->mm;
+ /*
+ * We're basically forcing default RPLs to 1, so that our "what privilege
+ * level are we returning to?" logic works.
+ */
+ p->failsafe_selector = FLAT_RING1_CS;
+ p->event_selector = FLAT_RING1_CS;
+ p->thread.ss1 = FLAT_RING1_DS;
+ for ( i = 0; i < 256; i++ ) p->thread.traps[i].cs = FLAT_RING1_CS;
+
sched_add_domain(p);
INIT_LIST_HEAD(&p->pg_head);
#define load_TR(n) __asm__ __volatile__ ("ltr %%ax" : : "a" (__TSS(n)<<3) )
-/* Guest OS must provide its own code selectors, or use the one we provide. */
-#define VALID_CODESEL(_s) \
- ((((_s)>>2) >= FIRST_DOMAIN_GDT_ENTRY) || ((_s) == FLAT_RING1_CS))
+/*
+ * Guest OS must provide its own code selectors, or use the one we provide.
+ * The RPL must be 1, as we only create bounce frames to ring 1.
+ */
+#define VALID_CODESEL(_s) \
+ (((((_s)>>2) >= FIRST_DOMAIN_GDT_ENTRY) || ((_s) == FLAT_RING1_CS)) && \
+ (((_s)&3) == 1))
+
+#define VALID_DATASEL(_s) \
+ (((((_s)>>2) >= FIRST_DOMAIN_GDT_ENTRY) || ((_s) == FLAT_RING1_DS)) && \
+ (((_s)&3) == 1))
/* These are bitmasks for the first 32 bits of a descriptor table entry. */
#define _SEGMENT_TYPE (15<< 8)